var runtime.mheap_
222 uses
runtime (current package)
arena.go#L661: span = mheap_.allocUserArenaChunk()
arena.go#L816: lock(&mheap_.lock)
arena.go#L817: mheap_.userArena.quarantineList.insert(s)
arena.go#L818: unlock(&mheap_.lock)
heapdump.go#L454: for _, s := range mheap_.allspans {
heapdump.go#L480: for _, s := range mheap_.allspans {
heapdump.go#L517: for i1 := range mheap_.arenas {
heapdump.go#L518: if mheap_.arenas[i1] == nil {
heapdump.go#L521: for i, ha := range mheap_.arenas[i1] {
heapdump.go#L652: for _, s := range mheap_.allspans {
heapdump.go#L675: for _, s := range mheap_.allspans {
malloc.go#L435: mheap_.init()
malloc.go#L517: hintList := &mheap_.arenaHints
malloc.go#L519: hintList = &mheap_.userArena.arenaHints
malloc.go#L521: hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
malloc.go#L546: mheap_.heapArenaAlloc.init(meta, arenaMetaSize, true)
malloc.go#L569: if mheap_.heapArenaAlloc.next <= p && p < mheap_.heapArenaAlloc.end {
malloc.go#L570: p = mheap_.heapArenaAlloc.end
malloc.go#L583: mheap_.arena.init(uintptr(a), size, false)
malloc.go#L584: p = mheap_.arena.end // For hint below
malloc.go#L588: hint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
malloc.go#L590: hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
malloc.go#L596: userArenaHint := (*arenaHint)(mheap_.arenaHintAlloc.alloc())
malloc.go#L598: userArenaHint.next, mheap_.userArena.arenaHints = mheap_.userArena.arenaHints, userArenaHint
malloc.go#L697: hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
malloc.go#L700: hint.next, mheap_.arenaHints = mheap_.arenaHints, hint
mbitmap.go#L413: ha := mheap_.arenas[ai.l1()][ai.l2()]
mbitmap.go#L812: ha := mheap_.arenas[ai.l1()][ai.l2()]
mbitmap.go#L865: ha := mheap_.arenas[ai.l1()][ai.l2()]
mbitmap.go#L893: ha := mheap_.arenas[ai.l1()][ai.l2()]
mbitmap.go#L1352: s := mheap_.allocManual(pages, spanAllocPtrScalarBits)
mbitmap.go#L1357: mheap_.freeManual(s, spanAllocPtrScalarBits)
mcache.go#L88: lock(&mheap_.lock)
mcache.go#L89: c = (*mcache)(mheap_.cachealloc.alloc())
mcache.go#L90: c.flushGen.Store(mheap_.sweepgen)
mcache.go#L91: unlock(&mheap_.lock)
mcache.go#L116: lock(&mheap_.lock)
mcache.go#L117: mheap_.cachealloc.free(unsafe.Pointer(c))
mcache.go#L118: unlock(&mheap_.lock)
mcache.go#L156: if s.sweepgen != mheap_.sweepgen+3 {
mcache.go#L159: mheap_.central[spc].mcentral.uncacheSpan(s)
mcache.go#L182: s = mheap_.central[spc].mcentral.cacheSpan()
mcache.go#L193: s.sweepgen = mheap_.sweepgen + 3
mcache.go#L234: s := mheap_.alloc(npages, spc)
mcache.go#L253: mheap_.central[spc].mcentral.fullSwept(mheap_.sweepgen).push(s)
mcache.go#L264: sg := mheap_.sweepgen
mcache.go#L291: mheap_.central[i].mcentral.uncacheSpan(s)
mcache.go#L320: sg := mheap_.sweepgen
mcache.go#L330: c.flushGen.Store(mheap_.sweepgen) // Synchronizes with gcStart
mcentral.go#L110: sg := mheap_.sweepgen
mcentral.go#L201: sg := mheap_.sweepgen
mcentral.go#L246: s := mheap_.alloc(npages, c.spanclass)
mcheckmark.go#L42: for _, ai := range mheap_.allArenas {
mcheckmark.go#L43: arena := mheap_.arenas[ai.l1()][ai.l2()]
mcheckmark.go#L92: arena := mheap_.arenas[ai.l1()][ai.l2()]
metrics.go#L649: lock(&mheap_.lock)
metrics.go#L651: a.mSpanInUse = uint64(mheap_.spanalloc.inuse)
metrics.go#L653: a.mCacheInUse = uint64(mheap_.cachealloc.inuse)
metrics.go#L654: unlock(&mheap_.lock)
mgc.go#L661: if fg := p.mcache.flushGen.Load(); fg != mheap_.sweepgen {
mgc.go#L662: println("runtime: p", p.id, "flushGen", fg, "!= sweepgen", mheap_.sweepgen)
mgc.go#L1068: mheap_.pages.scav.index.nextGen()
mgc.go#L1118: lock(&mheap_.lock)
mgc.go#L1119: pp.pcache.flush(&mheap_.pages)
mgc.go#L1120: unlock(&mheap_.lock)
mgc.go#L1190: mheap_.enableMetadataHugePages()
mgc.go#L1553: lock(&mheap_.lock)
mgc.go#L1554: mheap_.sweepgen += 2
mgc.go#L1556: mheap_.pagesSwept.Store(0)
mgc.go#L1557: mheap_.sweepArenas = mheap_.allArenas
mgc.go#L1558: mheap_.reclaimIndex.Store(0)
mgc.go#L1559: mheap_.reclaimCredit.Store(0)
mgc.go#L1560: unlock(&mheap_.lock)
mgc.go#L1567: lock(&mheap_.lock)
mgc.go#L1568: mheap_.sweepPagesPerByte = 0
mgc.go#L1569: unlock(&mheap_.lock)
mgc.go#L1615: lock(&mheap_.lock)
mgc.go#L1616: arenas := mheap_.allArenas
mgc.go#L1617: unlock(&mheap_.lock)
mgc.go#L1619: ha := mheap_.arenas[ai.l1()][ai.l2()]
mgc.go#L1756: lock(&mheap_.speciallock)
mgc.go#L1757: s := (*specialReachable)(mheap_.specialReachableAlloc.alloc())
mgc.go#L1758: unlock(&mheap_.speciallock)
mgc.go#L1783: lock(&mheap_.speciallock)
mgc.go#L1784: mheap_.specialReachableAlloc.free(unsafe.Pointer(s))
mgc.go#L1785: unlock(&mheap_.speciallock)
mgcmark.go#L96: mheap_.markArenas = mheap_.allArenas[:len(mheap_.allArenas):len(mheap_.allArenas)]
mgcmark.go#L97: work.nSpanRoots = len(mheap_.markArenas) * (pagesPerArena / pagesPerSpanRoot)
mgcmark.go#L333: sg := mheap_.sweepgen
mgcmark.go#L336: ai := mheap_.markArenas[shard/(pagesPerArena/pagesPerSpanRoot)]
mgcmark.go#L337: ha := mheap_.arenas[ai.l1()][ai.l2()]
mgcpacer.go#L1185: assertWorldStoppedOrLockHeld(&mheap_.lock)
mgcpacer.go#L1245: assertWorldStoppedOrLockHeld(&mheap_.lock)
mgcpacer.go#L1262: lock(&mheap_.lock)
mgcpacer.go#L1265: unlock(&mheap_.lock)
mgcpacer.go#L1294: assertWorldStoppedOrLockHeld(&mheap_.lock)
mgcpacer.go#L1309: lock(&mheap_.lock)
mgcpacer.go#L1314: unlock(&mheap_.lock)
mgcpacer.go#L1318: unlock(&mheap_.lock)
mgcpacer.go#L1426: assertWorldStoppedOrLockHeld(&mheap_.lock)
mgcscavenge.go#L168: assertWorldStoppedOrLockHeld(&mheap_.lock)
mgcscavenge.go#L395: r := mheap_.pages.scavenge(n, nil, false)
mgcscavenge.go#L661: mheap_.pages.scav.releasedBg.Add(released)
mgcsweep.go#L155: return sweepLocker{mheap_.sweepgen, false}
mgcsweep.go#L158: return sweepLocker{mheap_.sweepgen, true}
mgcsweep.go#L166: if sl.sweepGen != mheap_.sweepgen {
mgcsweep.go#L180: print("pacer: sweep done at heap size ", live>>20, "MB; allocated ", (live-mheap_.sweepHeapLiveBasis)>>20, "MB during sweep; swept ", mheap_.pagesSwept.Load(), " pages at ", mheap_.sweepPagesPerByte, " pages/byte\n")
mgcsweep.go#L256: sg := mheap_.sweepgen
mgcsweep.go#L257: for i := range mheap_.central {
mgcsweep.go#L258: c := &mheap_.central[i].mcentral
mgcsweep.go#L375: s := mheap_.nextSpanForSweep()
mgcsweep.go#L397: mheap_.reclaimCredit.Add(npages)
mgcsweep.go#L427: lock(&mheap_.lock)
mgcsweep.go#L430: releasedBg := mheap_.pages.scav.releasedBg.Load()
mgcsweep.go#L431: releasedEager := mheap_.pages.scav.releasedEager.Load()
mgcsweep.go#L437: mheap_.pages.scav.releasedBg.Add(-releasedBg)
mgcsweep.go#L438: mheap_.pages.scav.releasedEager.Add(-releasedEager)
mgcsweep.go#L439: unlock(&mheap_.lock)
mgcsweep.go#L517: sweepgen := mheap_.sweepgen
mgcsweep.go#L527: mheap_.pagesSwept.Add(int64(s.npages))
mgcsweep.go#L711: mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
mgcsweep.go#L717: mheap_.pagesInUse.Add(-s.npages)
mgcsweep.go#L725: if s.list != &mheap_.userArena.quarantineList {
mgcsweep.go#L728: lock(&mheap_.lock)
mgcsweep.go#L729: mheap_.userArena.quarantineList.remove(s)
mgcsweep.go#L730: mheap_.userArena.readyList.insert(s)
mgcsweep.go#L731: unlock(&mheap_.lock)
mgcsweep.go#L759: mheap_.freeSpan(s)
mgcsweep.go#L764: mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
mgcsweep.go#L766: mheap_.central[spc].mcentral.partialSwept(sweepgen).push(s)
mgcsweep.go#L792: mheap_.freeSpan(s)
mgcsweep.go#L808: mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s)
mgcsweep.go#L882: if mheap_.sweepPagesPerByte == 0 {
mgcsweep.go#L893: sweptBasis := mheap_.pagesSweptBasis.Load()
mgcsweep.go#L895: liveBasis := mheap_.sweepHeapLiveBasis
mgcsweep.go#L915: pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages)
mgcsweep.go#L916: for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) {
mgcsweep.go#L918: mheap_.sweepPagesPerByte = 0
mgcsweep.go#L921: if mheap_.pagesSweptBasis.Load() != sweptBasis {
mgcsweep.go#L947: assertWorldStoppedOrLockHeld(&mheap_.lock)
mgcsweep.go#L951: mheap_.sweepPagesPerByte = 0
mgcsweep.go#L968: pagesSwept := mheap_.pagesSwept.Load()
mgcsweep.go#L969: pagesInUse := mheap_.pagesInUse.Load()
mgcsweep.go#L972: mheap_.sweepPagesPerByte = 0
mgcsweep.go#L974: mheap_.sweepPagesPerByte = float64(sweepDistancePages) / float64(heapDistance)
mgcsweep.go#L975: mheap_.sweepHeapLiveBasis = heapLiveBasis
mgcsweep.go#L979: mheap_.pagesSweptBasis.Store(pagesSwept)
mgcwork.go#L119: lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
mgcwork.go#L363: lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
mgcwork.go#L378: s = mheap_.allocManual(workbufAlloc/pageSize, spanAllocWorkBuf)
mgcwork.go#L483: mheap_.freeManual(span, spanAllocWorkBuf)
mheap.go#L233: var mheap_ mheap
mheap.go#L691: if ri.l2() >= uint(len(mheap_.arenas[0])) {
mheap.go#L696: if ri.l1() >= uint(len(mheap_.arenas)) {
mheap.go#L700: l2 := mheap_.arenas[ri.l1()]
mheap.go#L719: return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena]
mheap.go#L745: arena = mheap_.arenas[ai.l1()][ai.l2()]
mheap.go#L1333: mheap_.pages.scav.releasedEager.Add(released)
mheap.go#L1673: systemstack(func() { mheap_.scavengeAll() })
mheap.go#L1820: ha := mheap_.arenas[ai.l1()][ai.l2()]
mheap.go#L1828: ha := mheap_.arenas[ai.l1()][ai.l2()]
mheap.go#L1943: lock(&mheap_.speciallock)
mheap.go#L1944: s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc())
mheap.go#L1945: unlock(&mheap_.speciallock)
mheap.go#L1974: lock(&mheap_.speciallock)
mheap.go#L1975: mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
mheap.go#L1976: unlock(&mheap_.speciallock)
mheap.go#L1986: lock(&mheap_.speciallock)
mheap.go#L1987: mheap_.specialfinalizeralloc.free(unsafe.Pointer(s))
mheap.go#L1988: unlock(&mheap_.speciallock)
mheap.go#L2000: lock(&mheap_.speciallock)
mheap.go#L2001: s := (*specialprofile)(mheap_.specialprofilealloc.alloc())
mheap.go#L2002: unlock(&mheap_.speciallock)
mheap.go#L2059: lock(&mheap_.speciallock)
mheap.go#L2060: mheap_.specialfinalizeralloc.free(unsafe.Pointer(sf))
mheap.go#L2061: unlock(&mheap_.speciallock)
mheap.go#L2065: lock(&mheap_.speciallock)
mheap.go#L2066: mheap_.specialprofilealloc.free(unsafe.Pointer(sp))
mheap.go#L2067: unlock(&mheap_.speciallock)
mheap.go#L2073: lock(&mheap_.speciallock)
mheap.go#L2074: mheap_.specialPinCounterAlloc.free(unsafe.Pointer(s))
mheap.go#L2075: unlock(&mheap_.speciallock)
mpagealloc.go#L452: lock(&mheap_.lock)
mpagealloc.go#L454: unlock(&mheap_.lock)
mpagealloc.go#L461: unlock(&mheap_.lock)
mpagealloc.go#L620: if p.test || mheap_.arenas[ai.l1()] == nil || mheap_.arenas[ai.l1()][ai.l2()] == nil {
mstats.go#L529: stats.MSpanInuse = uint64(mheap_.spanalloc.inuse)
mstats.go#L531: stats.MCacheInuse = uint64(mheap_.cachealloc.inuse)
mstats.go#L575: lock(&mheap_.lock)
mstats.go#L596: unlock(&mheap_.lock)
panic.go#L1239: if mheap_.cachealloc.size == 0 { // very early
pinner.go#L327: lock(&mheap_.speciallock)
pinner.go#L328: rec = (*specialPinCounter)(mheap_.specialPinCounterAlloc.alloc())
pinner.go#L329: unlock(&mheap_.speciallock)
pinner.go#L356: lock(&mheap_.speciallock)
pinner.go#L357: mheap_.specialPinCounterAlloc.free(unsafe.Pointer(counter))
pinner.go#L358: unlock(&mheap_.speciallock)
proc.go#L5141: mheap_.spanalloc.free(unsafe.Pointer(pp.mspancache.buf[i]))
proc.go#L5144: lock(&mheap_.lock)
proc.go#L5145: pp.pcache.flush(&mheap_.pages)
proc.go#L5146: unlock(&mheap_.lock)
stack.go#L196: lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
stack.go#L199: s = mheap_.allocManual(_StackCacheSize>>_PageShift, spanAllocStack)
stack.go#L263: mheap_.freeManual(s, spanAllocStack)
stack.go#L404: lockWithRankMayAcquire(&mheap_.lock, lockRankMheap)
stack.go#L408: s = mheap_.allocManual(npage, spanAllocStack)
stack.go#L498: mheap_.freeManual(s, spanAllocStack)
stack.go#L1233: mheap_.freeManual(s, spanAllocStack)
stack.go#L1247: mheap_.freeManual(s, spanAllocStack)
|
The pages are generated with Golds v0.6.7. (GOOS=linux GOARCH=amd64)
Golds is a Go 101 project developed by Tapir Liu.
PR and bug reports are welcome and can be submitted to the issue list.
Please follow @Go100and1 (reachable from the left QR code) to get the latest news of Golds. |